#include <xeno/errno.h>
#include <xeno/perfc.h>
#include <xeno/interrupt.h>
+#include <xeno/shadow.h>
#include <asm/page.h>
#include <asm/flushtlb.h>
#include <asm/io.h>
struct task_struct *subject_p;
} percpu_info[NR_CPUS] __cacheline_aligned;
+
/*
* init_frametable:
* Initialise per-frame memory information. This goes directly after
default:
BUG();
}
+
+#ifdef CONFIG_SHADOW
+ // assume we're in shadow mode if PSH_shadowed set
+ if ( page->shadow_and_flags & PSH_shadowed )
+ unshadow_table( page-frame_table );
+#endif
+
}
put_page_and_type(&frame_table[pagetable_val(current->mm.pagetable)
>> PAGE_SHIFT]);
current->mm.pagetable = mk_pagetable(pfn << PAGE_SHIFT);
+#ifdef CONFIG_SHADOW
+ current->mm.shadowtable =
+ shadow_mk_pagetable(pfn << PAGE_SHIFT, current->mm.shadowmode);
+#endif
invalidate_shadow_ldt();
percpu_info[cpu].deferred_ops |= DOP_FLUSH_TLB;
}
struct pfn_info *page;
int rc = 0, okay = 1, i, cpu = smp_processor_id();
unsigned int cmd;
+#ifdef CONFIG_SHADOW
+ unsigned long prev_spfn = 0;
+ l1_pgentry_t *prev_spl1e = 0;
+#endif
perfc_incrc(calls_to_mmu_update);
perfc_addc(num_page_updates, count);
{
okay = mod_l1_entry((l1_pgentry_t *)va,
mk_l1_pgentry(req.val));
+
+#ifdef CONFIG_SHADOW
+ if ( okay && page->shadow_and_flags & PSH_shadowed )
+ shadow_l1_normal_pt_update( req.ptr, req.val,
+ &prev_spfn, &prev_spl1e );
+#endif
+
put_page_type(page);
}
break;
okay = mod_l2_entry((l2_pgentry_t *)va,
mk_l2_pgentry(req.val),
pfn);
+#ifdef CONFIG_SHADOW
+ if ( okay && page->shadow_and_flags & PSH_shadowed )
+ shadow_l2_normal_pt_update( req.ptr, req.val );
+#endif
+
put_page_type(page);
}
break;
*(unsigned long *)va = req.val;
okay = 1;
put_page_type(page);
+
+#ifdef CONFIG_SHADOW
+ if ( page->shadow_and_flags & PSH_shadowed )
+ BUG();
+ // at present, we shouldn't be shadowing such pages
+#endif
+
+
}
break;
}
+
+check_pagetable( current->mm.pagetable, "mmu" ); // XXX XXX XXX XXX XXX
put_page(page);
if ( prev_pfn != 0 )
unmap_domain_mem((void *)va);
+#ifdef CONFIG_SHADOW
+ if( prev_spl1e != 0 )
+ unmap_domain_mem((void *)prev_spl1e);
+#endif
+
deferred_ops = percpu_info[cpu].deferred_ops;
percpu_info[cpu].deferred_ops = 0;
if ( deferred_ops & DOP_FLUSH_TLB )
- write_cr3_counted(pagetable_val(current->mm.pagetable));
+ {
+#ifdef CONFIG_SHADOW
+ if ( unlikely(current->mm.shadowmode) )
+ write_cr3_counted(pagetable_val(current->mm.shadowtable));
+ else
+#endif
+ write_cr3_counted(pagetable_val(current->mm.pagetable));
+ }
if ( deferred_ops & DOP_RELOAD_LDT )
(void)map_ldt_shadow_page(0);
unsigned int cpu = p->processor;
unsigned long deferred_ops;
+ perfc_incrc(calls_to_update_va);
+
if ( unlikely(page_nr >= (HYPERVISOR_VIRT_START >> PAGE_SHIFT)) )
return -EINVAL;
+ // XXX when we make this support 4MB pages we should also
+ // deal with the case of updating L2s
+
if ( unlikely(!mod_l1_entry(&linear_pg_table[page_nr],
mk_l1_pgentry(val))) )
err = -EINVAL;
+#ifdef CONFIG_SHADOW
+
+ if ( unlikely(p->mm.shadowmode) )
+ {
+ unsigned long sval = 0;
+
+ // XXX this only works for l1 entries, with no translation
+
+ if ( (val & _PAGE_PRESENT) && (val & _PAGE_ACCESSED) )
+ {
+ sval = val;
+ if ( !(val & _PAGE_DIRTY) )
+ sval &= ~_PAGE_RW;
+ }
+
+ /* printk("update_va_map: page_nr=%08lx val =%08lx sval =%08lx\n",
+ page_nr, val, sval);*/
+
+ if ( __put_user( sval, ((unsigned long *) (&shadow_linear_pg_table[page_nr])) ) )
+ {
+ // Since L2's are guranteed RW, failure indicates the page
+ // was not shadowed, so ignore.
+
+ //MEM_LOG("update_va_map: couldn't write update\n");
+ }
+ }
+
+check_pagetable( p->mm.pagetable, "va" );
+
+#endif
+
deferred_ops = percpu_info[cpu].deferred_ops;
percpu_info[cpu].deferred_ops = 0;
if ( unlikely(deferred_ops & DOP_FLUSH_TLB) ||
unlikely(flags & UVMF_FLUSH_TLB) )
- write_cr3_counted(pagetable_val(p->mm.pagetable));
+ {
+#ifdef CONFIG_SHADOW
+ if ( unlikely(p->mm.shadowmode) )
+ write_cr3_counted(pagetable_val(p->mm.shadowtable));
+ else
+#endif
+ write_cr3_counted(pagetable_val(p->mm.pagetable));
+ }
else if ( unlikely(flags & UVMF_INVLPG) )
__flush_tlb_one(page_nr << PAGE_SHIFT);
--- /dev/null
+/* -*- Mode:C; c-basic-offset:4; tab-width:4 -*- */
+
+#include <xeno/config.h>
+#include <xeno/types.h>
+#include <xeno/mm.h>
+#include <xeno/shadow.h>
+#include <asm/domain_page.h>
+#include <asm/page.h>
+
+#ifdef CONFIG_SHADOW
+
+
+#if 1
+#define MEM_VLOG(_f, _a...) \
+ printk("DOM%llu: (file=shadow.c, line=%d) " _f "\n", \
+ current->domain , __LINE__ , ## _a )
+#else
+#define MEM_VLOG(_f, _a...)
+#endif
+
+#if 0
+#define MEM_VVLOG(_f, _a...) \
+ printk("DOM%llu: (file=shadow.c, line=%d) " _f "\n", \
+ current->domain , __LINE__ , ## _a )
+#else
+#define MEM_VVLOG(_f, _a...)
+#endif
+
+
+/********
+
+To use these shadow page tables, guests must not rely on the ACCESSED
+and DIRTY bits on L2 pte's being accurate -- they will typically all be set.
+
+I doubt this will break anything. (If guests want to use the va_update
+mechanism they've signed up for this anyhow...)
+
+********/
+
+
+pagetable_t shadow_mk_pagetable( unsigned long gptbase,
+ unsigned int shadowmode )
+{
+ unsigned long gpfn, spfn=0;
+
+ MEM_VVLOG("shadow_mk_pagetable( gptbase=%08lx, mode=%d )",
+ gptbase, shadowmode );
+
+ if ( unlikely(shadowmode) )
+ {
+ gpfn = gptbase >> PAGE_SHIFT;
+
+ if ( likely(frame_table[gpfn].shadow_and_flags & PSH_shadowed) )
+ {
+ spfn = frame_table[gpfn].shadow_and_flags & PSH_pfn_mask;
+ }
+ else
+ {
+ spfn = shadow_l2_table( gpfn );
+ }
+ }
+
+ return mk_pagetable(spfn << PAGE_SHIFT);
+}
+
+void unshadow_table( unsigned long gpfn )
+{
+ unsigned long spfn;
+
+MEM_VLOG("unshadow_table %08lx\n", gpfn );
+
+ perfc_incrc(unshadow_table_count);
+
+ // this function is the same for both l1 and l2 tables
+
+ // even in the SMP guest case, there won't be a race here as
+ // this CPU was the one that cmpxchg'ed the page to invalid
+
+ spfn = frame_table[gpfn].shadow_and_flags & PSH_pfn_mask;
+ frame_table[gpfn].shadow_and_flags=0;
+ frame_table[spfn].shadow_and_flags=0;
+
+#ifdef DEBUG
+ { // XXX delete me!
+ int i;
+ unsigned long * spl1e = map_domain_mem( spfn<<PAGE_SHIFT );
+
+ for ( i = 0; i < ENTRIES_PER_L1_PAGETABLE; i++ )
+ {
+ spl1e[i] = 0xdead0000;
+ }
+ unmap_domain_mem( spl1e );
+ }
+#endif
+
+ free_domain_page( &frame_table[spfn] );
+}
+
+
+unsigned long shadow_l2_table( unsigned long gpfn )
+{
+ struct pfn_info *spfn_info;
+ unsigned long spfn;
+ l2_pgentry_t *spl2e, *gpl2e;
+ int i;
+
+ MEM_VVLOG("shadow_l2_table( %08lx )",gpfn);
+
+ perfc_incrc(shadow_l2_table_count);
+
+ // XXX in future, worry about racing in SMP guests
+ // -- use cmpxchg with PSH_pending flag to show progress (and spin)
+
+ spfn_info = alloc_domain_page( NULL ); // XXX account properly later
+
+ ASSERT( spfn_info ); // XXX deal with failure later e.g. blow cache
+
+ spfn = (unsigned long) (spfn_info - frame_table);
+
+ // mark pfn as being shadowed, update field to point at shadow
+ frame_table[gpfn].shadow_and_flags = spfn | PSH_shadowed;
+
+ // mark shadow pfn as being a shadow, update field to point at pfn
+ frame_table[spfn].shadow_and_flags = gpfn | PSH_shadow;
+
+ // we need to do this before the linear map is set up
+ spl2e = (l2_pgentry_t *) map_domain_mem(spfn << PAGE_SHIFT);
+
+ // get hypervisor and 2x linear PT mapings installed
+ memcpy(&spl2e[DOMAIN_ENTRIES_PER_L2_PAGETABLE],
+ &idle_pg_table[DOMAIN_ENTRIES_PER_L2_PAGETABLE],
+ HYPERVISOR_ENTRIES_PER_L2_PAGETABLE * sizeof(l2_pgentry_t));
+ spl2e[LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT] =
+ mk_l2_pgentry((gpfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
+ spl2e[SH_LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT] =
+ mk_l2_pgentry((spfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
+ spl2e[PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT] =
+ mk_l2_pgentry(__pa(frame_table[gpfn].u.domain->mm.perdomain_pt) |
+ __PAGE_HYPERVISOR);
+
+ // can't use the linear map as we may not be in the right PT
+ gpl2e = (l2_pgentry_t *) map_domain_mem(gpfn << PAGE_SHIFT);
+
+ // proactively create entries for pages that are already shadowed
+ for ( i = 0; i < DOMAIN_ENTRIES_PER_L2_PAGETABLE; i++ )
+ {
+ unsigned long spte = 0;
+
+#if 0 // Turns out this doesn't really help
+ unsigned long gpte;
+
+ gpte = l2_pgentry_val(gpl2e[i]);
+
+ if (gpte & _PAGE_PRESENT)
+ {
+ unsigned long s_sh =
+ frame_table[ gpte>>PAGE_SHIFT ].shadow_and_flags;
+
+ if( s_sh & PSH_shadowed ) // PSH_shadowed
+ {
+ if ( unlikely( (frame_table[gpte>>PAGE_SHIFT].type_and_flags & PGT_type_mask) == PGT_l2_page_table) )
+ {
+ printk("Linear mapping detected\n");
+ spte = gpte & ~_PAGE_RW;
+ }
+ else
+ {
+ spte = ( gpte & ~PAGE_MASK ) | (s_sh<<PAGE_SHIFT) |
+ _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED ;
+ }
+ // XXX should probably update guest to ACCESSED|DIRTY too...
+
+ }
+
+ }
+#endif
+
+ spl2e[i] = mk_l2_pgentry( spte );
+
+ }
+
+ // its arguable we should 'preemptively shadow' a few active L1 pages
+ // to avoid taking a string of faults when 'jacking' a running domain
+
+ unmap_domain_mem( gpl2e );
+ unmap_domain_mem( spl2e );
+
+ MEM_VLOG("shadow_l2_table( %08lx -> %08lx)",gpfn,spfn);
+
+
+ return spfn;
+}
+
+
+int shadow_fault( unsigned long va, long error_code )
+{
+ unsigned long gpte, spte;
+
+ MEM_VVLOG("shadow_fault( va=%08lx, code=%ld )", va, error_code );
+
+ if ( unlikely(__get_user(gpte, (unsigned long*)&linear_pg_table[va>>PAGE_SHIFT])) )
+ {
+ MEM_VVLOG("shadow_fault - EXIT: read gpte faulted" );
+ return 0; // propagate to guest
+ }
+
+ if ( ! (gpte & _PAGE_PRESENT) )
+ {
+ MEM_VVLOG("shadow_fault - EXIT: gpte not present (%lx)",gpte );
+ return 0; // we're not going to be able to help
+ }
+
+ spte = gpte;
+
+ if ( error_code & 2 )
+ { // write fault
+ if ( gpte & _PAGE_RW )
+ {
+ gpte |= _PAGE_DIRTY | _PAGE_ACCESSED;
+ spte |= _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED;
+ // (we're about to dirty it anyhow...)
+ }
+ else
+ { // write fault on RO page
+ MEM_VVLOG("shadow_fault - EXIT: write fault on RO page (%lx)",gpte );
+ return 0; // propagate to guest
+ // not clear whether we should set accessed bit here...
+ }
+ }
+ else
+ {
+ gpte |= _PAGE_ACCESSED;
+ spte |= _PAGE_ACCESSED; // about to happen anyway
+ if ( ! (gpte & _PAGE_DIRTY) )
+ spte &= ~_PAGE_RW; // force clear unless already dirty
+ }
+
+ MEM_VVLOG("plan: gpte=%08lx spte=%08lx", gpte, spte );
+
+ // write back updated gpte
+ // XXX watch out for read-only L2 entries! (not used in Linux)
+ if ( unlikely( __put_user( gpte, (unsigned long*)&linear_pg_table[va>>PAGE_SHIFT])) )
+ BUG(); // fixme!
+
+ if ( unlikely( __put_user( spte, (unsigned long*)&shadow_linear_pg_table[va>>PAGE_SHIFT])) )
+ {
+ // failed:
+ // the L1 may not be shadowed, or the L2 entry may be insufficient
+
+ unsigned long gpde, spde, gl1pfn, sl1pfn;
+
+ MEM_VVLOG("3: not shadowed or l2 insufficient gpte=%08lx spte=%08lx",gpte,spte );
+
+ gpde = l2_pgentry_val(linear_l2_table[va>>L2_PAGETABLE_SHIFT]);
+
+ gl1pfn = gpde>>PAGE_SHIFT;
+
+ if ( ! (frame_table[gl1pfn].shadow_and_flags & PSH_shadowed ) )
+ {
+ // this L1 is NOT already shadowed so we need to shadow it
+ struct pfn_info *sl1pfn_info;
+ unsigned long *gpl1e, *spl1e;
+ int i;
+ sl1pfn_info = alloc_domain_page( NULL ); // XXX account properly!
+ sl1pfn = sl1pfn_info - frame_table;
+
+ MEM_VVLOG("4a: l1 not shadowed ( %08lx )",sl1pfn);
+ perfc_incrc(shadow_l1_table_count);
+
+ sl1pfn_info->shadow_and_flags = PSH_shadow | gl1pfn;
+ frame_table[gl1pfn].shadow_and_flags = PSH_shadowed | sl1pfn;
+
+ gpde = gpde | _PAGE_ACCESSED | _PAGE_DIRTY;
+ spde = (gpde & ~PAGE_MASK) | _PAGE_RW | (sl1pfn<<PAGE_SHIFT);
+
+
+ linear_l2_table[va>>L2_PAGETABLE_SHIFT] = mk_l2_pgentry(gpde);
+ shadow_linear_l2_table[va>>L2_PAGETABLE_SHIFT] = mk_l2_pgentry(spde);
+
+ gpl1e = (unsigned long *) &(linear_pg_table[
+ (va>>PAGE_SHIFT) & ~(ENTRIES_PER_L1_PAGETABLE-1) ]);
+
+ spl1e = (unsigned long *) &shadow_linear_pg_table[
+ (va>>PAGE_SHIFT) & ~(ENTRIES_PER_L1_PAGETABLE-1) ];
+
+
+ // XXX can only do this is the shadow/guest is writeable
+ // disable write protection if ! gpde & _PAGE_RW ????
+
+ for ( i = 0; i < ENTRIES_PER_L1_PAGETABLE; i++ )
+ {
+#if SHADOW_OPTIMISE
+ if ( (gpl1e[i] & (_PAGE_PRESENT|_PAGE_ACCESSED) ) ==
+ (_PAGE_PRESENT|_PAGE_ACCESSED) )
+ {
+ spl1e[i] = gpl1e[i];
+ if ( !(gpl1e[i] & _PAGE_DIRTY) )
+ spl1e[i] &= ~_PAGE_RW;
+ }
+ else
+#endif
+ spl1e[i] = 0;
+ }
+
+
+ }
+ else
+ {
+ // this L1 was shadowed (by another PT) but we didn't have an L2
+ // entry for it
+
+ sl1pfn = frame_table[gl1pfn].shadow_and_flags & PSH_pfn_mask;
+
+ MEM_VVLOG("4b: was shadowed, l2 missing ( %08lx )",sl1pfn);
+
+ spde = (gpde & ~PAGE_MASK) | (sl1pfn<<PAGE_SHIFT) | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY;
+
+ gpde = gpde | _PAGE_ACCESSED | _PAGE_DIRTY;
+
+
+ if ( unlikely( (sl1pfn<<PAGE_SHIFT) == (gl1pfn<<PAGE_SHIFT) ) )
+ { // detect linear map, and keep pointing at guest
+ MEM_VLOG("4c: linear mapping ( %08lx )",sl1pfn);
+ spde = (spde & ~PAGE_MASK) | (gl1pfn<<PAGE_SHIFT);
+ }
+
+ linear_l2_table[va>>L2_PAGETABLE_SHIFT] = mk_l2_pgentry(gpde);
+ shadow_linear_l2_table[va>>L2_PAGETABLE_SHIFT] = mk_l2_pgentry(spde);
+
+
+ }
+
+ shadow_linear_pg_table[va>>PAGE_SHIFT] = mk_l1_pgentry(spte);
+ // (we need to do the above even if we've just made the shadow L1)
+
+ } // end of fixup writing the shadow L1 directly failed
+
+ perfc_incrc(shadow_fixup_count);
+
+ return 1; // let's try the faulting instruction again...
+
+}
+
+
+void shadow_l1_normal_pt_update( unsigned long pa, unsigned long gpte,
+ unsigned long *prev_spfn_ptr,
+ l1_pgentry_t **prev_spl1e_ptr )
+{
+ unsigned long gpfn, spfn, spte, prev_spfn = *prev_spfn_ptr;
+ l1_pgentry_t * spl1e, * prev_spl1e = *prev_spl1e_ptr;
+
+
+MEM_VVLOG("shadow_l1_normal_pt_update pa=%08lx, gpte=%08lx, prev_spfn=%08lx, prev_spl1e=%08lx\n",
+pa,gpte,prev_spfn, prev_spl1e);
+
+ // to get here, we know the l1 page *must* be shadowed
+
+ gpfn = pa >> PAGE_SHIFT;
+ spfn = frame_table[gpfn].shadow_and_flags & PSH_pfn_mask;
+
+ if ( spfn == prev_spfn )
+ {
+ spl1e = prev_spl1e;
+ }
+ else
+ {
+ if( prev_spl1e ) unmap_domain_mem( prev_spl1e );
+ spl1e = (l1_pgentry_t *) map_domain_mem( spfn << PAGE_SHIFT );
+ *prev_spfn_ptr = spfn;
+ *prev_spl1e_ptr = spl1e;
+ }
+ // XXX we assume only pagetables can be shadowed; this will have to change
+ // to allow arbitrary CoW etc.
+
+ spte = 0;
+
+#if SHADOW_OPTIMISE
+ if ( (gpte & (_PAGE_PRESENT|_PAGE_ACCESSED) ) ==
+ (_PAGE_PRESENT|_PAGE_ACCESSED) )
+ {
+ spte = gpte;
+ if ( !(gpte & _PAGE_DIRTY ) )
+ gpte &= ~ _PAGE_RW;
+ }
+#endif
+
+ spl1e[(pa & ~PAGE_MASK) / sizeof(l1_pgentry_t) ] =
+ mk_l1_pgentry( spte );
+
+ unmap_domain_mem( (void *) spl1e );
+}
+
+void shadow_l2_normal_pt_update( unsigned long pa, unsigned long gpte )
+{
+ unsigned long gpfn, spfn, spte;
+ l2_pgentry_t * sp2le;
+ unsigned long s_sh;
+
+ MEM_VVLOG("shadow_l2_normal_pt_update pa=%08lx, gpte=%08lx",pa,gpte);
+
+ // to get here, we know the l2 page has a shadow
+
+ gpfn = pa >> PAGE_SHIFT;
+ spfn = frame_table[gpfn].shadow_and_flags & PSH_pfn_mask;
+
+ sp2le = (l2_pgentry_t *) map_domain_mem( spfn << PAGE_SHIFT );
+ // no real need for a cache here
+
+ spte = 0;
+
+ s_sh = frame_table[gpte >> PAGE_SHIFT].shadow_and_flags;
+
+ if ( s_sh ) // PSH_shadowed
+ {
+ if ( unlikely( (frame_table[gpte>>PAGE_SHIFT].type_and_flags & PGT_type_mask) == PGT_l2_page_table) )
+ {
+ // linear page table case
+ spte = (gpte & ~_PAGE_RW) | _PAGE_DIRTY | _PAGE_ACCESSED;
+ }
+ else
+ spte = (gpte & ~PAGE_MASK) | (s_sh<<PAGE_SHIFT) | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED;
+
+ }
+
+ // XXXX Should mark guest pte as DIRTY and ACCESSED too!!!!!
+
+ sp2le[(pa & ~PAGE_MASK) / sizeof(l2_pgentry_t) ] =
+ mk_l2_pgentry( spte );
+
+ unmap_domain_mem( (void *) sp2le );
+}
+
+
+#if SHADOW_DEBUG
+
+static int sh_l2_present;
+static int sh_l1_present;
+char * sh_check_name;
+
+#define FAIL(_f, _a...) \
+{printk("XXX %s-FAIL (%d,%d)" _f " g=%08lx s=%08lx\n", sh_check_name, level, i, ## _a , gpte, spte ); BUG();}
+
+int check_pte( unsigned long gpte, unsigned long spte, int level, int i )
+{
+ unsigned long mask, gpfn, spfn;
+
+ if ( spte == 0 || spte == 0xdeadface || spte == 0x00000E00)
+ return 1; // always safe
+
+ if ( !(spte & _PAGE_PRESENT) )
+ FAIL("Non zero not present spte");
+
+ if( level == 2 ) sh_l2_present++;
+ if( level == 1 ) sh_l1_present++;
+
+ if ( !(gpte & _PAGE_PRESENT) )
+ FAIL("Guest not present yet shadow is");
+
+ mask = ~(_PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_RW|0xFFFFF000);
+
+ if ( (spte & mask) != (gpte & mask ) )
+ FAIL("Corrupt?");
+
+ if ( (spte & _PAGE_DIRTY ) && !(gpte & _PAGE_DIRTY) )
+ FAIL("Dirty coherence");
+
+ if ( (spte & _PAGE_ACCESSED ) && !(gpte & _PAGE_ACCESSED) )
+ FAIL("Accessed coherence");
+
+ if ( (spte & _PAGE_RW ) && !(gpte & _PAGE_RW) )
+ FAIL("RW coherence");
+
+ if ( (spte & _PAGE_RW ) && !((gpte & _PAGE_RW) && (gpte & _PAGE_DIRTY) ))
+ FAIL("RW2 coherence");
+
+ spfn = spte>>PAGE_SHIFT;
+ gpfn = gpte>>PAGE_SHIFT;
+
+ if ( gpfn == spfn )
+ {
+ if ( level > 1 )
+ FAIL("Linear map ???"); // XXX this will fail on BSD
+
+#if 0 // might be a RO mapping of a page table page
+ if ( frame_table[gpfn].shadow_and_flags != 0 )
+ {
+ FAIL("Should have been shadowed g.sf=%08lx s.sf=%08lx",
+ frame_table[gpfn].shadow_and_flags,
+ frame_table[spfn].shadow_and_flags);
+ }
+ else
+#endif
+ return 1;
+ }
+ else
+ {
+ if ( level < 2 )
+ FAIL("Shadow in L1 entry?");
+
+ if ( frame_table[gpfn].shadow_and_flags != (PSH_shadowed | spfn) )
+ FAIL("spfn problem g.sf=%08lx s.sf=%08lx [g.sf]=%08lx [s.sf]=%08lx",
+ frame_table[gpfn].shadow_and_flags,
+ frame_table[spfn].shadow_and_flags,
+ frame_table[frame_table[gpfn].shadow_and_flags&PSH_pfn_mask].shadow_and_flags,
+ frame_table[frame_table[spfn].shadow_and_flags&PSH_pfn_mask].shadow_and_flags
+ );
+
+ if ( frame_table[spfn].shadow_and_flags != (PSH_shadow | gpfn) )
+ FAIL("gpfn problem g.sf=%08lx s.sf=%08lx",
+ frame_table[gpfn].shadow_and_flags,
+ frame_table[spfn].shadow_and_flags);
+
+ }
+
+ return 1;
+}
+
+
+int check_l1_table( unsigned long va, unsigned long g2, unsigned long s2 )
+{
+ int j;
+ unsigned long *gpl1e, *spl1e;
+
+ gpl1e = (unsigned long *) &(linear_pg_table[ va>>PAGE_SHIFT]);
+ spl1e = (unsigned long *) &(shadow_linear_pg_table[ va>>PAGE_SHIFT]);
+
+
+ for ( j = 0; j < ENTRIES_PER_L1_PAGETABLE; j++ )
+ {
+ unsigned long gpte = gpl1e[j];
+ unsigned long spte = spl1e[j];
+
+ check_pte( gpte, spte, 1, j );
+ }
+
+ return 1;
+}
+
+#define FAILPT(_f, _a...) \
+{printk("XXX FAILPT" _f "\n", ## _a ); BUG();}
+
+int check_pagetable( pagetable_t pt, char *s )
+{
+ unsigned long gptbase = pagetable_val(pt);
+ unsigned long gpfn, spfn;
+ int i;
+ l2_pgentry_t *gpl2e, *spl2e;
+
+return 1;
+
+ sh_check_name = s;
+
+ MEM_VVLOG("%s-PT Audit",s);
+
+ sh_l2_present = sh_l1_present = 0;
+
+ gpfn = gptbase >> PAGE_SHIFT;
+
+ if ( ! (frame_table[gpfn].shadow_and_flags & PSH_shadowed) )
+ {
+ printk("%s-PT %08lx not shadowed\n", s, gptbase);
+
+ if( frame_table[gpfn].shadow_and_flags != 0 ) BUG();
+
+ return 0;
+ }
+
+ spfn = frame_table[gpfn].shadow_and_flags & PSH_pfn_mask;
+
+ if ( ! frame_table[gpfn].shadow_and_flags == (PSH_shadowed | spfn) )
+ FAILPT("ptbase shadow inconsistent1");
+
+ if ( ! frame_table[spfn].shadow_and_flags == (PSH_shadow | gpfn) )
+ FAILPT("ptbase shadow inconsistent2");
+
+
+ // use the linear map to get a pointer to the L2
+ gpl2e = (l2_pgentry_t *) &(linear_l2_table[0]);
+ spl2e = (l2_pgentry_t *) &(shadow_linear_l2_table[0]);
+
+ // check the whole L2
+ for ( i = 0; i < DOMAIN_ENTRIES_PER_L2_PAGETABLE; i++ )
+ {
+ unsigned long gpte = l2_pgentry_val(gpl2e[i]);
+ unsigned long spte = l2_pgentry_val(spl2e[i]);
+
+ check_pte( gpte, spte, 2, i );
+ }
+
+
+ // go back and recurse
+ for ( i = 0; i < DOMAIN_ENTRIES_PER_L2_PAGETABLE; i++ )
+ {
+ unsigned long gpte = l2_pgentry_val(gpl2e[i]);
+ unsigned long spte = l2_pgentry_val(spl2e[i]);
+
+ if ( spte )
+ check_l1_table(
+ i<<L2_PAGETABLE_SHIFT,
+ gpte>>PAGE_SHIFT, spte>>PAGE_SHIFT );
+
+ }
+
+
+ MEM_VVLOG("PT verified : l2_present = %d, l1_present = %d\n",
+ sh_l2_present, sh_l1_present );
+
+ return 1;
+}
+
+
+#endif
+
+
+#endif // CONFIG_SHADOW
+
+
+